猫狗大战毕业项目

探索性可视化

In [1]:
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import cv2

%matplotlib inline
/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/matplotlib/__init__.py:1067: UserWarning: Duplicate key in file "/home/ubuntu/.config/matplotlib/matplotlibrc", line #2
  (fname, cnt))
/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/matplotlib/__init__.py:1067: UserWarning: Duplicate key in file "/home/ubuntu/.config/matplotlib/matplotlibrc", line #3
  (fname, cnt))
In [2]:
img_files = [f for f in listdir('./train') if isfile(join('./train', f))]
img_data = defaultdict(list)
for img_file in img_files:
    img_data[img_file[:3]].append(img_file)

img_data = pd.DataFrame(img_data)
img_data = pd.Series([img_data['cat'].count(), img_data['dog'].count()], index=['cat', 'dog'])
img_data.plot(kind='bar')
Out[2]:
<matplotlib.axes._subplots.AxesSubplot at 0x7f542f5cf358>

训练集中猫和狗的图片各占一半,分别为12500张。

数据预处理

通过研究清单可以知道ImageNet的1000种分类中狗和猫对应的标签,可以根据这个信息快速构建自己的异常值检测器。

In [2]:
from keras.applications.xception import Xception
from keras.applications.xception import preprocess_input as xception_preprocess
from keras.applications.xception import decode_predictions as xception_decode

from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input as inception_preprocess
from keras.applications.inception_v3 import decode_predictions as inception_decode

from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input as resnet_preprocess
from keras.applications.resnet50 import decode_predictions as resnet_decode

from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input as vgg16_preprocess
from keras.applications.vgg16 import decode_predictions as vgg16_decode

from keras.preprocessing import image
/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
In [4]:
dogs = [
    'n02085620','n02085782','n02085936','n02086079', 
    'n02086240','n02086646','n02086910','n02087046',
    'n02087394','n02088094','n02088238','n02088364',
    'n02088466','n02088632','n02089078','n02089867',
    'n02089973','n02090379','n02090622','n02090721',
    'n02091032','n02091134','n02091244','n02091467',
    'n02091635','n02091831','n02092002','n02092339',
    'n02093256','n02093428','n02093647','n02093754',
    'n02093859','n02093991','n02094114','n02094258',
    'n02094433','n02095314','n02095570','n02095889',
    'n02096051','n02096177','n02096294','n02096437',
    'n02096585','n02097047','n02097130','n02097209',
    'n02097298','n02097474','n02097658','n02098105',
    'n02098286','n02098413','n02099267','n02099429',
    'n02099601','n02099712','n02099849','n02100236',
    'n02100583','n02100735','n02100877','n02101006',
    'n02101388','n02101556','n02102040','n02102177',
    'n02102318','n02102480','n02102973','n02104029',
    'n02104365','n02105056','n02105162','n02105251',
    'n02105412','n02105505','n02105641','n02105855',
    'n02106030','n02106166','n02106382','n02106550',
    'n02106662','n02107142','n02107312','n02107574',
    'n02107683','n02107908','n02108000','n02108089',
    'n02108422','n02108551','n02108915','n02109047',
    'n02109525','n02109961','n02110063','n02110185',
    'n02110341','n02110627','n02110806','n02110958',
    'n02111129','n02111277','n02111500','n02111889',
    'n02112018','n02112137','n02112350','n02112706',
    'n02113023','n02113186','n02113624','n02113712',
    'n02113799','n02113978']

cats = [
    'n02123045','n02123159','n02123394','n02123597',
    'n02124075','n02125311','n02127052',
]
In [5]:
np.random.RandomState(42).shuffle(img_files)

def abnormals_detect(detector, img_files, img_size, top, preprocess, decode):
    abnormals = []
    for img_file in img_files:
        img = image.load_img('./train/'+img_file, target_size=img_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess(x)

        preds = detector.predict(x)
        preds = decode(preds, top=top)
        dog_or_cat_found = False
        for pred in preds[0]:
            if pred[0] in dogs or pred[0] in cats:
                dog_or_cat_found = True
            
        if not dog_or_cat_found:
            abnormals.append(img_file)
    return abnormals

def display_image(img_paths):
    for path in img_paths:
        img = cv2.imread('./train/'+path)
        cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        plt.imshow(cv_rgb)
        plt.show()
In [6]:
xception_detector = Xception(weights='imagenet')
xception_abnormals = abnormals_detect(xception_detector, img_files[:1000], (299, 299), 5, xception_preprocess, xception_decode)
print('{} abnormals detected: {}'.format(len(xception_abnormals), xception_abnormals))
display_image(xception_abnormals)
15 abnormals detected: ['cat.6213.jpg', 'cat.7932.jpg', 'cat.5168.jpg', 'cat.1757.jpg', 'cat.796.jpg', 'cat.5092.jpg', 'cat.11706.jpg', 'cat.9377.jpg', 'dog.10297.jpg', 'dog.3629.jpg', 'cat.12493.jpg', 'dog.11299.jpg', 'cat.4982.jpg', 'cat.1204.jpg', 'cat.7545.jpg']

根据Keras文档,准确率最高的模型是Xception,其Top-1和Top-5准确率分别为0.790和0.945。先以这个模型作为检测器,以1000张混洗过的图片作为样本,看看Top-5下都有哪些图片被识别为非猫非狗,检测出来有14张异常值,误报率较高,但的确也发现了 一些有问题的图片,比如有1张就是非猫非狗。下面将Top值提高到10看看。

In [7]:
xception_abnormals = abnormals_detect(xception_detector, img_files[:1000], (299, 299), 10, xception_preprocess, xception_decode)
print('{} abnormals detected: {}'.format(len(xception_abnormals), xception_abnormals))
display_image(xception_abnormals)
6 abnormals detected: ['cat.5168.jpg', 'cat.9377.jpg', 'dog.10297.jpg', 'cat.12493.jpg', 'dog.11299.jpg', 'cat.7545.jpg']

误报率仍然比较高,下面再分别试验下Top-20和Top-30。

In [8]:
xception_abnormals = abnormals_detect(xception_detector, img_files[:1000], (299, 299), 20, xception_preprocess, xception_decode)
print('{} abnormals detected: {}'.format(len(xception_abnormals), xception_abnormals))
display_image(xception_abnormals)
2 abnormals detected: ['dog.10297.jpg', 'dog.11299.jpg']
In [9]:
xception_abnormals = abnormals_detect(xception_detector, img_files[:1000], (299, 299), 30, xception_preprocess, xception_decode)
print('{} abnormals detected: {}'.format(len(xception_abnormals), xception_abnormals))
display_image(xception_abnormals)
1 abnormals detected: ['dog.11299.jpg']

从得到的结果可以看出Top值为30时检测到了1张图片,该图片是一个商标,既不是猫也不是狗,下面把样本数量提高到5000,再看看检测结果。

In [10]:
xception_abnormals = abnormals_detect(xception_detector, img_files[:5000], (299, 299), 30, xception_preprocess, xception_decode)
print('{} abnormals detected: {}'.format(len(xception_abnormals), xception_abnormals))
display_image(xception_abnormals)
12 abnormals detected: ['dog.11299.jpg', 'cat.2975.jpg', 'cat.8383.jpg', 'dog.5604.jpg', 'cat.8470.jpg', 'dog.11437.jpg', 'cat.5974.jpg', 'dog.6475.jpg', 'dog.10237.jpg', 'dog.10801.jpg', 'cat.5071.jpg', 'cat.10029.jpg']

上面5000个样本的检测结果显示的确有一些图片既不是猫也不是狗,但还是有一些正常的图片被误报了,不算太理想,把Top值提高到60,样本数量提高到10000再看看。

In [11]:
xception_abnormals = abnormals_detect(xception_detector, img_files[:10000], (299, 299), 60, xception_preprocess, xception_decode)
print('{} abnormals detected: {}'.format(len(xception_abnormals), xception_abnormals))
display_image(xception_abnormals)
9 abnormals detected: ['dog.11299.jpg', 'dog.5604.jpg', 'cat.8470.jpg', 'cat.5974.jpg', 'dog.10801.jpg', 'cat.10029.jpg', 'cat.8456.jpg', 'dog.10161.jpg', 'dog.4367.jpg']

从10000张图片中检测出来17张图片,其中有很多是非猫非狗的异常图片,虽然也有一部分图片被误报了,但是都属于图片内容比较复杂或者质量不高的,因此Top值为60是一个比较理想的值,下面结合4种模型,对整个训练集图片进行检测,然后将结果合并起来得到最终的异常值检测结果。

In [12]:
xception_abnormals = abnormals_detect(xception_detector, img_files, (299, 299), 60, xception_preprocess, xception_decode)
print('{} abnormals detected by Xception'.format(len(xception_abnormals)))
39 abnormals detected by Xception
In [14]:
resnet_detector = ResNet50(weights='imagenet')
resnet_abnormals = abnormals_detect(resnet_detector, img_files, (224, 224), 60, resnet_preprocess, resnet_decode)
print('{} abnormals detected by ResNet'.format(len(resnet_abnormals)))
34 abnormals detected by ResNet
In [15]:
inception_detector = InceptionV3(weights='imagenet')
inception_abnormals = abnormals_detect(inception_detector, img_files, (299, 299), 60, inception_preprocess, inception_decode)
print('{} abnormals detected by Inception'.format(len(inception_abnormals)))
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5
96116736/96112376 [==============================] - 1s 0us/step
23 abnormals detected by Inception
In [16]:
vgg16_detector = VGG16(weights='imagenet')
vgg16_abnormals = abnormals_detect(vgg16_detector, img_files, (224, 224), 60, vgg16_preprocess, vgg16_decode)
print('{} abnormals detected by VGG16'.format(len(vgg16_abnormals)))
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5
553467904/553467096 [==============================] - 6s 0us/step
93 abnormals detected by VGG16
In [19]:
total_abnormals = set().\
union(set(xception_abnormals)).\
union(set(resnet_abnormals)).\
union(set(inception_abnormals)).\
union(set(vgg16_abnormals))

print('{} abnormals detected: {}'.format(len(total_abnormals), list(total_abnormals)))
118 abnormals detected: ['cat.9494.jpg', 'dog.3074.jpg', 'dog.10123.jpg', 'cat.2691.jpg', 'cat.7314.jpg', 'cat.6614.jpg', 'cat.8383.jpg', 'cat.9391.jpg', 'cat.596.jpg', 'cat.3758.jpg', 'dog.8450.jpg', 'dog.1259.jpg', 'cat.3739.jpg', 'dog.9188.jpg', 'cat.8138.jpg', 'cat.8456.jpg', 'dog.10155.jpg', 'dog.6405.jpg', 'cat.9983.jpg', 'dog.1895.jpg', 'dog.4127.jpg', 'cat.2845.jpg', 'dog.2614.jpg', 'dog.5602.jpg', 'dog.4367.jpg', 'dog.6733.jpg', 'dog.1546.jpg', 'dog.9705.jpg', 'cat.10365.jpg', 'cat.8921.jpg', 'dog.5604.jpg', 'dog.2422.jpg', 'cat.5954.jpg', 'cat.7564.jpg', 'cat.4577.jpg', 'dog.12148.jpg', 'dog.1625.jpg', 'dog.806.jpg', 'cat.9624.jpg', 'cat.3672.jpg', 'dog.3341.jpg', 'cat.4338.jpg', 'cat.7377.jpg', 'dog.4507.jpg', 'cat.2520.jpg', 'cat.12272.jpg', 'dog.11248.jpg', 'cat.6345.jpg', 'dog.10237.jpg', 'cat.4833.jpg', 'dog.5618.jpg', 'dog.8898.jpg', 'cat.10700.jpg', 'dog.4218.jpg', 'cat.11504.jpg', 'cat.11373.jpg', 'dog.10801.jpg', 'dog.10190.jpg', 'dog.10225.jpg', 'cat.10712.jpg', 'dog.10161.jpg', 'cat.5418.jpg', 'dog.10747.jpg', 'cat.10536.jpg', 'cat.6402.jpg', 'cat.12203.jpg', 'dog.11266.jpg', 'cat.4308.jpg', 'dog.630.jpg', 'cat.7703.jpg', 'cat.3123.jpg', 'cat.712.jpg', 'cat.9171.jpg', 'dog.1773.jpg', 'dog.12376.jpg', 'cat.4272.jpg', 'cat.2337.jpg', 'dog.9246.jpg', 'cat.10029.jpg', 'cat.5974.jpg', 'cat.7968.jpg', 'dog.2339.jpg', 'cat.8470.jpg', 'cat.5351.jpg', 'cat.6817.jpg', 'dog.12331.jpg', 'dog.6475.jpg', 'cat.2939.jpg', 'dog.8736.jpg', 'cat.5880.jpg', 'cat.11184.jpg', 'cat.10277.jpg', 'dog.11465.jpg', 'cat.12476.jpg', 'cat.12493.jpg', 'dog.1194.jpg', 'cat.10636.jpg', 'cat.146.jpg', 'cat.2433.jpg', 'cat.9520.jpg', 'dog.10842.jpg', 'cat.12424.jpg', 'dog.7729.jpg', 'dog.3497.jpg', 'dog.3889.jpg', 'cat.4688.jpg', 'dog.1308.jpg', 'dog.11299.jpg', 'cat.7487.jpg', 'cat.8755.jpg', 'dog.9517.jpg', 'cat.11039.jpg', 'dog.6725.jpg', 'dog.10551.jpg', 'cat.2038.jpg', 'dog.11437.jpg', 'dog.7772.jpg', 'dog.10637.jpg']

综合四种模型输出的结果得到118张图片,通过人工分析发现这些被判定为异常值的图片有以下几种类型:

  1. 图片中有猫狗,但尺寸太小,不够清晰;
  2. 图片中有猫狗,但内容比较复杂;
  3. 图片非猫非狗;
  4. 图片是猫和狗的卡通形象;

我认为情况1应该保留,因为这里不能假定所有输入模型的图片都是清晰的,总会有一些模糊的图片,模型应当对这样的图片具有一定的健壮性;情况2也应该保留,模型也应该对一些复杂的图片具有一定的健壮性;情况3应该删除;情况4则比较主观,但我训练该模型的目的是为了识别真实世界的猫狗,所以我选择删除。最终被删除的有38张图片。

In [21]:
import os

deleted = [
    'dog.10190.jpg', 'cat.7377.jpg', 'cat.5418.jpg', 'dog.8898.jpg', 'dog.10747.jpg',
    'dog.6475.jpg', 'cat.11184.jpg', 'cat.4833.jpg', 'dog.1895.jpg', 'cat.10029.jpg',
    'dog.1308.jpg', 'dog.8736.jpg', 'cat.12272.jpg', 'dog.11299.jpg', 'cat.7968.jpg',
    'cat.2939.jpg', 'dog.4367.jpg', 'cat.5351.jpg', 'dog.10237.jpg', 'cat.8456.jpg',
    'dog.5604.jpg', 'dog.1773.jpg', 'dog.3889.jpg', 'cat.9171.jpg', 'cat.4338.jpg',
    'cat.7564.jpg', 'dog.9517.jpg', 'dog.12376.jpg', 'cat.4688.jpg', 'dog.9188.jpg',
    'dog.10801.jpg', 'dog.1194.jpg', 'dog.10161.jpg', 'dog.1259.jpg', 'cat.10712.jpg',
    'dog.2614.jpg', 'cat.8470.jpg', 'cat.3672.jpg',
]

for image in deleted:
    os.remove('./train/'+image)
In [32]:
img_files = [f for f in listdir('./train') if isfile(join('./train', f))]
img_data = defaultdict(list)
for img_file in img_files:
    img_data[img_file[:3]].append(img_file)

print('{} in total, cat {}, dog {}'.format(len(img_files), len(img_data['cat']), len(img_data['dog'])))
24962 in total, cat 12483, dog 12479

清洗后的训练集图片有24962张,其中猫有12483张,狗有12479张。

执行过程

创建目录结构

为了方便之后使用Keras的ImageDataGenerator,需要创建一个新文件夹trains,然后把猫和狗的照片分别放到其下的子文件夹cats和dogs中,为了节约磁盘空间,这里使用符号连接来实现。

In [3]:
import os
import shutil

image_files = os.listdir('./train')
cat_images = filter(lambda x: x[:3] == 'cat', image_files)
dog_images = filter(lambda x: x[:3] == 'dog', image_files)

def mkdir_from_scratch(dir_name):
    if os.path.exists(dir_name):
        shutil.rmtree(dir_name)
    os.mkdir(dir_name)
    
mkdir_from_scratch('./trains')
os.mkdir('./trains/cats')
os.mkdir('./trains/dogs')

mkdir_from_scratch('./tests')
os.symlink('../test', './tests/test')

for image in cat_images:
    os.symlink('../../train/'+image, './trains/cats/'+image)
    
for image in dog_images:
    os.symlink('../../train/'+image, './trains/dogs/'+image)

导出特征向量

In [3]:
from keras.layers.core import Lambda
from keras.layers import Input, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
import h5py

def output_gap_features(model_class, image_size, preprocess_func, model_name):
    # 获取图片的尺寸:长度和宽度
    width = image_size[0]
    height = image_size[1]
    # 输入-预处理-输出(全局平均池化),构建作为特征向量抽取器的模型
    x = Input((height, width, 3))
    x = Lambda(preprocess_func)(x)
    base_model = model_class(input_tensor=x, weights='imagenet', include_top=False)
    model = Model(base_model.input, GlobalAveragePooling2D()(base_model.output))
    
    #创建数据生成器
    gen = ImageDataGenerator()
    train_generator = gen.flow_from_directory('./trains', image_size, shuffle=False, batch_size=64)
    test_generator = gen.flow_from_directory('./tests', image_size, shuffle=False, batch_size=64, class_mode=None)
    
    #预测得到特征向量
    train = model.predict_generator(train_generator, train_generator.samples//64+1, verbose=1)
    test = model.predict_generator(test_generator, test_generator.samples//64+1, verbose=1)
    
    #导出特征向量
    with h5py.File('bottleneck_features_{}.h5'.format(model_name)) as h:
        h.create_dataset('train', data=train)
        h.create_dataset('test', data=test)
        h.create_dataset('label', data=train_generator.classes)
    
    print('output gap features for {} finished'.format(model_name))
In [28]:
# bottleneck features for ResNet
output_gap_features(ResNet50, (224, 224), resnet_preprocess, 'resnet')
Found 24962 images belonging to 2 classes.
Found 12500 images belonging to 1 classes.
391/391 [==============================] - 246s 629ms/step
196/196 [==============================] - 122s 622ms/step
output gap features for resnet finished
In [29]:
# bottleneck features for Xception
output_gap_features(Xception, (299, 299), xception_preprocess, 'xception')
Found 24962 images belonging to 2 classes.
Found 12500 images belonging to 1 classes.
391/391 [==============================] - 536s 1s/step
196/196 [==============================] - 266s 1s/step
output gap features for xception finished
In [30]:
# bottleneck features for Inception
output_gap_features(InceptionV3, (299, 299), inception_preprocess, 'inception')
Found 24962 images belonging to 2 classes.
Found 12500 images belonging to 1 classes.
391/391 [==============================] - 355s 908ms/step
196/196 [==============================] - 176s 897ms/step
output gap features for inception finished
In [31]:
# bottleneck features for VGG 16
output_gap_features(VGG16, (224, 224), vgg16_preprocess, 'vgg16')
Found 24962 images belonging to 2 classes.
Found 12500 images belonging to 1 classes.
391/391 [==============================] - 267s 682ms/step
196/196 [==============================] - 132s 671ms/step
output gap features for vgg16 finished

载入特征向量

In [4]:
from sklearn.utils import shuffle

np.random.seed(42)

X_train = []
X_test = []
feature_files = [
    'bottleneck_features_inception.h5', 
    'bottleneck_features_resnet.h5', 
    'bottleneck_features_vgg16.h5', 
    'bottleneck_features_xception.h5'
]

for h5_file in feature_files:
    with h5py.File(h5_file, 'r') as h:
        X_train.append(np.array(h['train']))
        X_test.append(np.array(h['test']))
        y_train = np.array(h['label'])

X_train = np.concatenate(X_train, axis=1)
X_test = np.concatenate(X_test, axis=1)

X_train, y_train = shuffle(X_train, y_train)
In [5]:
print(X_train.shape, X_test.shape, y_train.shape)
(24962, 6656) (12500, 6656) (24962,)

构建模型

In [6]:
from keras.layers import Dense
from keras.layers import Dropout
from keras.optimizers import Adadelta
In [11]:
input_tensor = Input(X_train.shape[1:])
x = input_tensor
x = Dense(1, activation='sigmoid')(x)
model = Model(input_tensor, x)
model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])

训练模型

In [12]:
hist = model.fit(X_train, y_train, batch_size=64, epochs=100, validation_split=0.2)
Train on 19969 samples, validate on 4993 samples
Epoch 1/100
19969/19969 [==============================] - 4s 208us/step - loss: 0.0521 - acc: 0.9824 - val_loss: 0.0189 - val_acc: 0.9932
Epoch 2/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0169 - acc: 0.9944 - val_loss: 0.0190 - val_acc: 0.9938
Epoch 3/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0122 - acc: 0.9963 - val_loss: 0.0150 - val_acc: 0.9954
Epoch 4/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0088 - acc: 0.9973 - val_loss: 0.0180 - val_acc: 0.9946
Epoch 5/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0066 - acc: 0.9979 - val_loss: 0.0162 - val_acc: 0.9950
Epoch 6/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0056 - acc: 0.9982 - val_loss: 0.0151 - val_acc: 0.9952
Epoch 7/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0044 - acc: 0.9989 - val_loss: 0.0164 - val_acc: 0.9948
Epoch 8/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0038 - acc: 0.9991 - val_loss: 0.0151 - val_acc: 0.9946
Epoch 9/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0034 - acc: 0.9991 - val_loss: 0.0199 - val_acc: 0.9942
Epoch 10/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0027 - acc: 0.9993 - val_loss: 0.0204 - val_acc: 0.9946
Epoch 11/100
19969/19969 [==============================] - 1s 63us/step - loss: 0.0022 - acc: 0.9996 - val_loss: 0.0294 - val_acc: 0.9928
Epoch 12/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0018 - acc: 0.9997 - val_loss: 0.0179 - val_acc: 0.9954
Epoch 13/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0016 - acc: 0.9998 - val_loss: 0.0184 - val_acc: 0.9946
Epoch 14/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0012 - acc: 0.9998 - val_loss: 0.0186 - val_acc: 0.9942
Epoch 15/100
19969/19969 [==============================] - 1s 64us/step - loss: 0.0011 - acc: 0.9998 - val_loss: 0.0174 - val_acc: 0.9952
Epoch 16/100
19969/19969 [==============================] - 1s 64us/step - loss: 8.7785e-04 - acc: 0.9998 - val_loss: 0.0195 - val_acc: 0.9952
Epoch 17/100
19969/19969 [==============================] - 1s 64us/step - loss: 7.9211e-04 - acc: 0.9998 - val_loss: 0.0198 - val_acc: 0.9952
Epoch 18/100
19969/19969 [==============================] - 1s 64us/step - loss: 7.2262e-04 - acc: 0.9999 - val_loss: 0.0190 - val_acc: 0.9944
Epoch 19/100
19969/19969 [==============================] - 1s 64us/step - loss: 6.2770e-04 - acc: 0.9999 - val_loss: 0.0190 - val_acc: 0.9952
Epoch 20/100
19969/19969 [==============================] - 1s 64us/step - loss: 6.2178e-04 - acc: 0.9999 - val_loss: 0.0192 - val_acc: 0.9954
Epoch 21/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.6994e-04 - acc: 0.9999 - val_loss: 0.0205 - val_acc: 0.9948
Epoch 22/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.2088e-04 - acc: 0.9999 - val_loss: 0.0209 - val_acc: 0.9950
Epoch 23/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.8191e-04 - acc: 0.9999 - val_loss: 0.0240 - val_acc: 0.9940
Epoch 24/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.2779e-04 - acc: 1.0000 - val_loss: 0.0200 - val_acc: 0.9946
Epoch 25/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.3915e-04 - acc: 0.9999 - val_loss: 0.0210 - val_acc: 0.9948
Epoch 26/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.0842e-04 - acc: 1.0000 - val_loss: 0.0216 - val_acc: 0.9948
Epoch 27/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.8142e-04 - acc: 1.0000 - val_loss: 0.0213 - val_acc: 0.9950
Epoch 28/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.6501e-04 - acc: 1.0000 - val_loss: 0.0235 - val_acc: 0.9948
Epoch 29/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.4160e-04 - acc: 1.0000 - val_loss: 0.0240 - val_acc: 0.9950
Epoch 30/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.7213e-04 - acc: 1.0000 - val_loss: 0.0229 - val_acc: 0.9946
Epoch 31/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.1460e-04 - acc: 1.0000 - val_loss: 0.0222 - val_acc: 0.9946
Epoch 32/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.5205e-04 - acc: 0.9999 - val_loss: 0.0236 - val_acc: 0.9946
Epoch 33/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.0125e-04 - acc: 1.0000 - val_loss: 0.0379 - val_acc: 0.9922
Epoch 34/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.1855e-04 - acc: 1.0000 - val_loss: 0.0235 - val_acc: 0.9944
Epoch 35/100
19969/19969 [==============================] - 1s 64us/step - loss: 9.8410e-05 - acc: 1.0000 - val_loss: 0.0226 - val_acc: 0.9946
Epoch 36/100
19969/19969 [==============================] - 1s 65us/step - loss: 8.0466e-05 - acc: 1.0000 - val_loss: 0.0230 - val_acc: 0.9944
Epoch 37/100
19969/19969 [==============================] - 1s 64us/step - loss: 9.2596e-05 - acc: 1.0000 - val_loss: 0.0232 - val_acc: 0.9944
Epoch 38/100
19969/19969 [==============================] - 1s 64us/step - loss: 7.5436e-05 - acc: 1.0000 - val_loss: 0.0238 - val_acc: 0.9944
Epoch 39/100
19969/19969 [==============================] - 1s 65us/step - loss: 7.7657e-05 - acc: 1.0000 - val_loss: 0.0247 - val_acc: 0.9948
Epoch 40/100
19969/19969 [==============================] - 1s 65us/step - loss: 6.9785e-05 - acc: 1.0000 - val_loss: 0.0244 - val_acc: 0.9938
Epoch 41/100
19969/19969 [==============================] - 1s 65us/step - loss: 6.2306e-05 - acc: 1.0000 - val_loss: 0.0241 - val_acc: 0.9946
Epoch 42/100
19969/19969 [==============================] - 1s 65us/step - loss: 5.8215e-05 - acc: 1.0000 - val_loss: 0.0232 - val_acc: 0.9940
Epoch 43/100
19969/19969 [==============================] - 1s 65us/step - loss: 5.6832e-05 - acc: 1.0000 - val_loss: 0.0248 - val_acc: 0.9944
Epoch 44/100
19969/19969 [==============================] - 1s 66us/step - loss: 5.3146e-05 - acc: 1.0000 - val_loss: 0.0237 - val_acc: 0.9944
Epoch 45/100
19969/19969 [==============================] - 1s 65us/step - loss: 5.1042e-05 - acc: 1.0000 - val_loss: 0.0242 - val_acc: 0.9942
Epoch 46/100
19969/19969 [==============================] - 1s 64us/step - loss: 5.0252e-05 - acc: 1.0000 - val_loss: 0.0241 - val_acc: 0.9942
Epoch 47/100
19969/19969 [==============================] - 1s 64us/step - loss: 4.4398e-05 - acc: 1.0000 - val_loss: 0.0239 - val_acc: 0.9940
Epoch 48/100
19969/19969 [==============================] - 1s 64us/step - loss: 4.5281e-05 - acc: 1.0000 - val_loss: 0.0245 - val_acc: 0.9944
Epoch 49/100
19969/19969 [==============================] - 1s 64us/step - loss: 4.3117e-05 - acc: 1.0000 - val_loss: 0.0276 - val_acc: 0.9940
Epoch 50/100
19969/19969 [==============================] - 1s 63us/step - loss: 4.5843e-05 - acc: 1.0000 - val_loss: 0.0250 - val_acc: 0.9940
Epoch 51/100
19969/19969 [==============================] - 1s 64us/step - loss: 4.0041e-05 - acc: 1.0000 - val_loss: 0.0247 - val_acc: 0.9944
Epoch 52/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.7330e-05 - acc: 1.0000 - val_loss: 0.0243 - val_acc: 0.9936
Epoch 53/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.6523e-05 - acc: 1.0000 - val_loss: 0.0241 - val_acc: 0.9936
Epoch 54/100
19969/19969 [==============================] - 1s 64us/step - loss: 4.0790e-05 - acc: 1.0000 - val_loss: 0.0247 - val_acc: 0.9940
Epoch 55/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.3642e-05 - acc: 1.0000 - val_loss: 0.0254 - val_acc: 0.9944
Epoch 56/100
19969/19969 [==============================] - 1s 63us/step - loss: 3.4180e-05 - acc: 1.0000 - val_loss: 0.0251 - val_acc: 0.9942
Epoch 57/100
19969/19969 [==============================] - 1s 64us/step - loss: 3.3513e-05 - acc: 1.0000 - val_loss: 0.0250 - val_acc: 0.9938
Epoch 58/100
19969/19969 [==============================] - 1s 63us/step - loss: 3.3358e-05 - acc: 1.0000 - val_loss: 0.0255 - val_acc: 0.9940
Epoch 59/100
19969/19969 [==============================] - 1s 63us/step - loss: 3.0008e-05 - acc: 1.0000 - val_loss: 0.0250 - val_acc: 0.9940
Epoch 60/100
19969/19969 [==============================] - 1s 63us/step - loss: 3.0535e-05 - acc: 1.0000 - val_loss: 0.0262 - val_acc: 0.9942
Epoch 61/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.7876e-05 - acc: 1.0000 - val_loss: 0.0259 - val_acc: 0.9944
Epoch 62/100
19969/19969 [==============================] - 1s 63us/step - loss: 2.8096e-05 - acc: 1.0000 - val_loss: 0.0252 - val_acc: 0.9936
Epoch 63/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.7534e-05 - acc: 1.0000 - val_loss: 0.0252 - val_acc: 0.9938
Epoch 64/100
19969/19969 [==============================] - 1s 63us/step - loss: 2.8183e-05 - acc: 1.0000 - val_loss: 0.0257 - val_acc: 0.9938
Epoch 65/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.6087e-05 - acc: 1.0000 - val_loss: 0.0255 - val_acc: 0.9938
Epoch 66/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.5577e-05 - acc: 1.0000 - val_loss: 0.0259 - val_acc: 0.9936
Epoch 67/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.4779e-05 - acc: 1.0000 - val_loss: 0.0259 - val_acc: 0.9938
Epoch 68/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.4311e-05 - acc: 1.0000 - val_loss: 0.0264 - val_acc: 0.9936
Epoch 69/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.2835e-05 - acc: 1.0000 - val_loss: 0.0268 - val_acc: 0.9942
Epoch 70/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.4015e-05 - acc: 1.0000 - val_loss: 0.0267 - val_acc: 0.9940
Epoch 71/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.3208e-05 - acc: 1.0000 - val_loss: 0.0262 - val_acc: 0.9938
Epoch 72/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.2553e-05 - acc: 1.0000 - val_loss: 0.0263 - val_acc: 0.9938
Epoch 73/100
19969/19969 [==============================] - 1s 65us/step - loss: 2.2094e-05 - acc: 1.0000 - val_loss: 0.0268 - val_acc: 0.9940
Epoch 74/100
19969/19969 [==============================] - 2s 75us/step - loss: 2.1253e-05 - acc: 1.0000 - val_loss: 0.0269 - val_acc: 0.9942
Epoch 75/100
19969/19969 [==============================] - 1s 70us/step - loss: 2.0821e-05 - acc: 1.0000 - val_loss: 0.0261 - val_acc: 0.9938
Epoch 76/100
19969/19969 [==============================] - 1s 64us/step - loss: 2.1560e-05 - acc: 1.0000 - val_loss: 0.0267 - val_acc: 0.9942
Epoch 77/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.9617e-05 - acc: 1.0000 - val_loss: 0.0269 - val_acc: 0.9944
Epoch 78/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.9711e-05 - acc: 1.0000 - val_loss: 0.0262 - val_acc: 0.9938
Epoch 79/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.9578e-05 - acc: 1.0000 - val_loss: 0.0263 - val_acc: 0.9938
Epoch 80/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.8993e-05 - acc: 1.0000 - val_loss: 0.0272 - val_acc: 0.9940
Epoch 81/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.8800e-05 - acc: 1.0000 - val_loss: 0.0272 - val_acc: 0.9940
Epoch 82/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.8519e-05 - acc: 1.0000 - val_loss: 0.0266 - val_acc: 0.9938
Epoch 83/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.7687e-05 - acc: 1.0000 - val_loss: 0.0281 - val_acc: 0.9946
Epoch 84/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.8054e-05 - acc: 1.0000 - val_loss: 0.0271 - val_acc: 0.9940
Epoch 85/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.7345e-05 - acc: 1.0000 - val_loss: 0.0270 - val_acc: 0.9940
Epoch 86/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.7695e-05 - acc: 1.0000 - val_loss: 0.0269 - val_acc: 0.9940
Epoch 87/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.7075e-05 - acc: 1.0000 - val_loss: 0.0269 - val_acc: 0.9938
Epoch 88/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.6805e-05 - acc: 1.0000 - val_loss: 0.0271 - val_acc: 0.9938
Epoch 89/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.6151e-05 - acc: 1.0000 - val_loss: 0.0278 - val_acc: 0.9940
Epoch 90/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.6230e-05 - acc: 1.0000 - val_loss: 0.0272 - val_acc: 0.9940
Epoch 91/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.6125e-05 - acc: 1.0000 - val_loss: 0.0272 - val_acc: 0.9940
Epoch 92/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.5638e-05 - acc: 1.0000 - val_loss: 0.0271 - val_acc: 0.9938
Epoch 93/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.5240e-05 - acc: 1.0000 - val_loss: 0.0278 - val_acc: 0.9942
Epoch 94/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.5491e-05 - acc: 1.0000 - val_loss: 0.0279 - val_acc: 0.9942
Epoch 95/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.4994e-05 - acc: 1.0000 - val_loss: 0.0276 - val_acc: 0.9940
Epoch 96/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.5027e-05 - acc: 1.0000 - val_loss: 0.0273 - val_acc: 0.9940
Epoch 97/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.4671e-05 - acc: 1.0000 - val_loss: 0.0274 - val_acc: 0.9940
Epoch 98/100
19969/19969 [==============================] - 1s 63us/step - loss: 1.4506e-05 - acc: 1.0000 - val_loss: 0.0280 - val_acc: 0.9940
Epoch 99/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.4285e-05 - acc: 1.0000 - val_loss: 0.0277 - val_acc: 0.9938
Epoch 100/100
19969/19969 [==============================] - 1s 64us/step - loss: 1.4262e-05 - acc: 1.0000 - val_loss: 0.0276 - val_acc: 0.9938
In [11]:
def plot_loss_and_accuracy(hist):
    plt.figure().set_size_inches(18,6)
    plt.subplot(1,2,1)
    plt.plot(hist.history['loss'][5:],'b')
    plt.plot(hist.history['val_loss'][5:],'r')
    plt.ylabel('Loss')
    plt.subplot(1,2,2)
    plt.plot(hist.history['acc'][5:],'b')
    plt.plot(hist.history['val_acc'][5:],'r')
    plt.ylabel('Accuracy')
    plt.show()
In [14]:
plot_loss_and_accuracy(hist)

优化

首先,从验证集的Loss曲线(左图红色)可以看到呈上升趋势,说明模型过拟合了,需要防止过拟合,重新构建模型,增加Dropout层。

In [15]:
input_tensor = Input(X_train.shape[1:])
x = input_tensor
# p = 0.25, still overfitting
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(input_tensor, x)
model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
In [16]:
hist = model.fit(X_train, y_train, batch_size=64, epochs=100, validation_split=0.2)
Train on 19969 samples, validate on 4993 samples
Epoch 1/100
19969/19969 [==============================] - 1s 74us/step - loss: 0.0843 - acc: 0.9710 - val_loss: 0.0201 - val_acc: 0.9936
Epoch 2/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0233 - acc: 0.9924 - val_loss: 0.0154 - val_acc: 0.9948
Epoch 3/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0177 - acc: 0.9947 - val_loss: 0.0162 - val_acc: 0.9940
Epoch 4/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0168 - acc: 0.9949 - val_loss: 0.0156 - val_acc: 0.9950
Epoch 5/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0145 - acc: 0.9955 - val_loss: 0.0234 - val_acc: 0.9934
Epoch 6/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0133 - acc: 0.9962 - val_loss: 0.0145 - val_acc: 0.9954
Epoch 7/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0106 - acc: 0.9962 - val_loss: 0.0164 - val_acc: 0.9950
Epoch 8/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0109 - acc: 0.9969 - val_loss: 0.0207 - val_acc: 0.9934
Epoch 9/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0116 - acc: 0.9962 - val_loss: 0.0153 - val_acc: 0.9950
Epoch 10/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0102 - acc: 0.9967 - val_loss: 0.0152 - val_acc: 0.9948
Epoch 11/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0101 - acc: 0.9967 - val_loss: 0.0151 - val_acc: 0.9956
Epoch 12/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0083 - acc: 0.9980 - val_loss: 0.0274 - val_acc: 0.9918
Epoch 13/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0083 - acc: 0.9973 - val_loss: 0.0176 - val_acc: 0.9952
Epoch 14/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0087 - acc: 0.9971 - val_loss: 0.0161 - val_acc: 0.9946
Epoch 15/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0080 - acc: 0.9972 - val_loss: 0.0152 - val_acc: 0.9960
Epoch 16/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0085 - acc: 0.9971 - val_loss: 0.0164 - val_acc: 0.9946
Epoch 17/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0076 - acc: 0.9974 - val_loss: 0.0150 - val_acc: 0.9956
Epoch 18/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0074 - acc: 0.9975 - val_loss: 0.0175 - val_acc: 0.9948
Epoch 19/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0092 - acc: 0.9969 - val_loss: 0.0178 - val_acc: 0.9948
Epoch 20/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0068 - acc: 0.9978 - val_loss: 0.0156 - val_acc: 0.9954
Epoch 21/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0064 - acc: 0.9981 - val_loss: 0.0153 - val_acc: 0.9962
Epoch 22/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0060 - acc: 0.9979 - val_loss: 0.0153 - val_acc: 0.9958
Epoch 23/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0072 - acc: 0.9977 - val_loss: 0.0161 - val_acc: 0.9962
Epoch 24/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0083 - acc: 0.9977 - val_loss: 0.0190 - val_acc: 0.9950
Epoch 25/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0063 - acc: 0.9980 - val_loss: 0.0213 - val_acc: 0.9938
Epoch 26/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0063 - acc: 0.9977 - val_loss: 0.0164 - val_acc: 0.9958
Epoch 27/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0072 - acc: 0.9975 - val_loss: 0.0161 - val_acc: 0.9962
Epoch 28/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0060 - acc: 0.9982 - val_loss: 0.0164 - val_acc: 0.9950
Epoch 29/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0067 - acc: 0.9978 - val_loss: 0.0167 - val_acc: 0.9954
Epoch 30/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0177 - val_acc: 0.9952
Epoch 31/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0060 - acc: 0.9982 - val_loss: 0.0179 - val_acc: 0.9946
Epoch 32/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0050 - acc: 0.9983 - val_loss: 0.0163 - val_acc: 0.9956
Epoch 33/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0063 - acc: 0.9979 - val_loss: 0.0223 - val_acc: 0.9944
Epoch 34/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0059 - acc: 0.9978 - val_loss: 0.0163 - val_acc: 0.9960
Epoch 35/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0061 - acc: 0.9977 - val_loss: 0.0172 - val_acc: 0.9956
Epoch 36/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0051 - acc: 0.9985 - val_loss: 0.0184 - val_acc: 0.9954
Epoch 37/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0162 - val_acc: 0.9962
Epoch 38/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0056 - acc: 0.9981 - val_loss: 0.0166 - val_acc: 0.9956
Epoch 39/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0068 - acc: 0.9980 - val_loss: 0.0175 - val_acc: 0.9962
Epoch 40/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0065 - acc: 0.9979 - val_loss: 0.0189 - val_acc: 0.9950
Epoch 41/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0054 - acc: 0.9978 - val_loss: 0.0196 - val_acc: 0.9950
Epoch 42/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0063 - acc: 0.9976 - val_loss: 0.0182 - val_acc: 0.9950
Epoch 43/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0050 - acc: 0.9984 - val_loss: 0.0171 - val_acc: 0.9956
Epoch 44/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0072 - acc: 0.9973 - val_loss: 0.0175 - val_acc: 0.9956
Epoch 45/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0053 - acc: 0.9981 - val_loss: 0.0178 - val_acc: 0.9958
Epoch 46/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0059 - acc: 0.9982 - val_loss: 0.0209 - val_acc: 0.9946
Epoch 47/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0053 - acc: 0.9979 - val_loss: 0.0173 - val_acc: 0.9956
Epoch 48/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0048 - acc: 0.9982 - val_loss: 0.0179 - val_acc: 0.9960
Epoch 49/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0054 - acc: 0.9983 - val_loss: 0.0203 - val_acc: 0.9946
Epoch 50/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0063 - acc: 0.9977 - val_loss: 0.0178 - val_acc: 0.9956
Epoch 51/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0065 - acc: 0.9978 - val_loss: 0.0172 - val_acc: 0.9956
Epoch 52/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0048 - acc: 0.9985 - val_loss: 0.0226 - val_acc: 0.9948
Epoch 53/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0052 - acc: 0.9979 - val_loss: 0.0207 - val_acc: 0.9950
Epoch 54/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0053 - acc: 0.9982 - val_loss: 0.0210 - val_acc: 0.9950
Epoch 55/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0059 - acc: 0.9981 - val_loss: 0.0188 - val_acc: 0.9952
Epoch 56/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0055 - acc: 0.9981 - val_loss: 0.0174 - val_acc: 0.9956
Epoch 57/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0051 - acc: 0.9985 - val_loss: 0.0174 - val_acc: 0.9956
Epoch 58/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0042 - acc: 0.9984 - val_loss: 0.0176 - val_acc: 0.9952
Epoch 59/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0043 - acc: 0.9987 - val_loss: 0.0171 - val_acc: 0.9958
Epoch 60/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0069 - acc: 0.9977 - val_loss: 0.0209 - val_acc: 0.9950
Epoch 61/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0052 - acc: 0.9984 - val_loss: 0.0199 - val_acc: 0.9956
Epoch 62/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0048 - acc: 0.9985 - val_loss: 0.0193 - val_acc: 0.9954
Epoch 63/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0049 - acc: 0.9983 - val_loss: 0.0198 - val_acc: 0.9952
Epoch 64/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0050 - acc: 0.9983 - val_loss: 0.0202 - val_acc: 0.9956
Epoch 65/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0049 - acc: 0.9985 - val_loss: 0.0209 - val_acc: 0.9948
Epoch 66/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0043 - acc: 0.9987 - val_loss: 0.0217 - val_acc: 0.9950
Epoch 67/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0047 - acc: 0.9984 - val_loss: 0.0196 - val_acc: 0.9950
Epoch 68/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0051 - acc: 0.9984 - val_loss: 0.0217 - val_acc: 0.9950
Epoch 69/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0052 - acc: 0.9984 - val_loss: 0.0225 - val_acc: 0.9946
Epoch 70/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0044 - acc: 0.9982 - val_loss: 0.0199 - val_acc: 0.9956
Epoch 71/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0057 - acc: 0.9980 - val_loss: 0.0211 - val_acc: 0.9952
Epoch 72/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0048 - acc: 0.9984 - val_loss: 0.0204 - val_acc: 0.9954
Epoch 73/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0043 - acc: 0.9984 - val_loss: 0.0220 - val_acc: 0.9948
Epoch 74/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0060 - acc: 0.9979 - val_loss: 0.0208 - val_acc: 0.9954
Epoch 75/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0058 - acc: 0.9980 - val_loss: 0.0206 - val_acc: 0.9956
Epoch 76/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0053 - acc: 0.9984 - val_loss: 0.0239 - val_acc: 0.9950
Epoch 77/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0043 - acc: 0.9984 - val_loss: 0.0202 - val_acc: 0.9954
Epoch 78/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0058 - acc: 0.9983 - val_loss: 0.0242 - val_acc: 0.9948
Epoch 79/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0045 - acc: 0.9984 - val_loss: 0.0225 - val_acc: 0.9952
Epoch 80/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0056 - acc: 0.9981 - val_loss: 0.0233 - val_acc: 0.9950
Epoch 81/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0062 - acc: 0.9979 - val_loss: 0.0223 - val_acc: 0.9950
Epoch 82/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0033 - acc: 0.9990 - val_loss: 0.0211 - val_acc: 0.9952
Epoch 83/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0047 - acc: 0.9988 - val_loss: 0.0210 - val_acc: 0.9954
Epoch 84/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0049 - acc: 0.9983 - val_loss: 0.0250 - val_acc: 0.9946
Epoch 85/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0037 - acc: 0.9986 - val_loss: 0.0220 - val_acc: 0.9954
Epoch 86/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0050 - acc: 0.9984 - val_loss: 0.0208 - val_acc: 0.9956
Epoch 87/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0052 - acc: 0.9985 - val_loss: 0.0257 - val_acc: 0.9946
Epoch 88/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0034 - acc: 0.9989 - val_loss: 0.0212 - val_acc: 0.9956
Epoch 89/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0046 - acc: 0.9985 - val_loss: 0.0233 - val_acc: 0.9950
Epoch 90/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0050 - acc: 0.9984 - val_loss: 0.0220 - val_acc: 0.9952
Epoch 91/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0038 - acc: 0.9988 - val_loss: 0.0218 - val_acc: 0.9950
Epoch 92/100
19969/19969 [==============================] - 2s 77us/step - loss: 0.0033 - acc: 0.9990 - val_loss: 0.0217 - val_acc: 0.9952
Epoch 93/100
19969/19969 [==============================] - 1s 72us/step - loss: 0.0041 - acc: 0.9988 - val_loss: 0.0226 - val_acc: 0.9952
Epoch 94/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0058 - acc: 0.9983 - val_loss: 0.0217 - val_acc: 0.9956
Epoch 95/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0054 - acc: 0.9987 - val_loss: 0.0248 - val_acc: 0.9950
Epoch 96/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0055 - acc: 0.9984 - val_loss: 0.0260 - val_acc: 0.9948
Epoch 97/100
19969/19969 [==============================] - 1s 66us/step - loss: 0.0053 - acc: 0.9983 - val_loss: 0.0267 - val_acc: 0.9946
Epoch 98/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0045 - acc: 0.9987 - val_loss: 0.0225 - val_acc: 0.9952
Epoch 99/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0042 - acc: 0.9988 - val_loss: 0.0237 - val_acc: 0.9948
Epoch 100/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0060 - acc: 0.9981 - val_loss: 0.0249 - val_acc: 0.9946
In [17]:
plot_loss_and_accuracy(hist)

验证集的Loss曲线出现明显的震荡,需要进一步调参:减少学习率

In [7]:
input_tensor = Input(X_train.shape[1:])
x = input_tensor
# p = 0.25, still overfitting
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(input_tensor, x)
# epochs=100 val loss仍在下降,alpha=1e-3,5e-3,1e-2略小,适当增大
model.compile(optimizer=Adadelta(lr=3e-2), loss='binary_crossentropy', metrics=['accuracy'])
In [9]:
check_pointer = ModelCheckpoint(filepath='best_model.h5', verbose=1, save_best_only=True)
hist = model.fit(X_train, y_train, batch_size=64, epochs=100, validation_split=0.2, callbacks=[check_pointer], verbose=1)
Train on 19969 samples, validate on 4993 samples
Epoch 1/100
19969/19969 [==============================] - 1s 70us/step - loss: 0.0317 - acc: 0.9888 - val_loss: 0.0206 - val_acc: 0.9922

Epoch 00001: val_loss improved from inf to 0.02060, saving model to best_model.h5
Epoch 2/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0308 - acc: 0.9903 - val_loss: 0.0197 - val_acc: 0.9932

Epoch 00002: val_loss improved from 0.02060 to 0.01966, saving model to best_model.h5
Epoch 3/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0280 - acc: 0.9905 - val_loss: 0.0190 - val_acc: 0.9936

Epoch 00003: val_loss improved from 0.01966 to 0.01901, saving model to best_model.h5
Epoch 4/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0273 - acc: 0.9908 - val_loss: 0.0188 - val_acc: 0.9932

Epoch 00004: val_loss improved from 0.01901 to 0.01875, saving model to best_model.h5
Epoch 5/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0265 - acc: 0.9913 - val_loss: 0.0184 - val_acc: 0.9932

Epoch 00005: val_loss improved from 0.01875 to 0.01843, saving model to best_model.h5
Epoch 6/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0241 - acc: 0.9918 - val_loss: 0.0189 - val_acc: 0.9928

Epoch 00006: val_loss did not improve
Epoch 7/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0258 - acc: 0.9917 - val_loss: 0.0177 - val_acc: 0.9934

Epoch 00007: val_loss improved from 0.01843 to 0.01774, saving model to best_model.h5
Epoch 8/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0251 - acc: 0.9920 - val_loss: 0.0177 - val_acc: 0.9932

Epoch 00008: val_loss improved from 0.01774 to 0.01770, saving model to best_model.h5
Epoch 9/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0253 - acc: 0.9919 - val_loss: 0.0175 - val_acc: 0.9934

Epoch 00009: val_loss improved from 0.01770 to 0.01749, saving model to best_model.h5
Epoch 10/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0233 - acc: 0.9924 - val_loss: 0.0173 - val_acc: 0.9938

Epoch 00010: val_loss improved from 0.01749 to 0.01729, saving model to best_model.h5
Epoch 11/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0226 - acc: 0.9936 - val_loss: 0.0171 - val_acc: 0.9936

Epoch 00011: val_loss improved from 0.01729 to 0.01706, saving model to best_model.h5
Epoch 12/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0220 - acc: 0.9932 - val_loss: 0.0167 - val_acc: 0.9934

Epoch 00012: val_loss improved from 0.01706 to 0.01666, saving model to best_model.h5
Epoch 13/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0226 - acc: 0.9930 - val_loss: 0.0168 - val_acc: 0.9936

Epoch 00013: val_loss did not improve
Epoch 14/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0213 - acc: 0.9933 - val_loss: 0.0170 - val_acc: 0.9932

Epoch 00014: val_loss did not improve
Epoch 15/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0216 - acc: 0.9932 - val_loss: 0.0168 - val_acc: 0.9936

Epoch 00015: val_loss did not improve
Epoch 16/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0219 - acc: 0.9933 - val_loss: 0.0162 - val_acc: 0.9932

Epoch 00016: val_loss improved from 0.01666 to 0.01624, saving model to best_model.h5
Epoch 17/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0205 - acc: 0.9936 - val_loss: 0.0171 - val_acc: 0.9940

Epoch 00017: val_loss did not improve
Epoch 18/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0204 - acc: 0.9935 - val_loss: 0.0161 - val_acc: 0.9942

Epoch 00018: val_loss improved from 0.01624 to 0.01609, saving model to best_model.h5
Epoch 19/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0197 - acc: 0.9938 - val_loss: 0.0164 - val_acc: 0.9938

Epoch 00019: val_loss did not improve
Epoch 20/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0198 - acc: 0.9942 - val_loss: 0.0160 - val_acc: 0.9942

Epoch 00020: val_loss improved from 0.01609 to 0.01605, saving model to best_model.h5
Epoch 21/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0192 - acc: 0.9937 - val_loss: 0.0160 - val_acc: 0.9940

Epoch 00021: val_loss improved from 0.01605 to 0.01600, saving model to best_model.h5
Epoch 22/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0183 - acc: 0.9941 - val_loss: 0.0158 - val_acc: 0.9938

Epoch 00022: val_loss improved from 0.01600 to 0.01583, saving model to best_model.h5
Epoch 23/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0196 - acc: 0.9941 - val_loss: 0.0157 - val_acc: 0.9940

Epoch 00023: val_loss improved from 0.01583 to 0.01572, saving model to best_model.h5
Epoch 24/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0186 - acc: 0.9947 - val_loss: 0.0157 - val_acc: 0.9940

Epoch 00024: val_loss improved from 0.01572 to 0.01568, saving model to best_model.h5
Epoch 25/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0194 - acc: 0.9941 - val_loss: 0.0160 - val_acc: 0.9940

Epoch 00025: val_loss did not improve
Epoch 26/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0190 - acc: 0.9940 - val_loss: 0.0159 - val_acc: 0.9940

Epoch 00026: val_loss did not improve
Epoch 27/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0173 - acc: 0.9949 - val_loss: 0.0152 - val_acc: 0.9942

Epoch 00027: val_loss improved from 0.01568 to 0.01522, saving model to best_model.h5
Epoch 28/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0172 - acc: 0.9945 - val_loss: 0.0155 - val_acc: 0.9942

Epoch 00028: val_loss did not improve
Epoch 29/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0162 - acc: 0.9950 - val_loss: 0.0149 - val_acc: 0.9952

Epoch 00029: val_loss improved from 0.01522 to 0.01486, saving model to best_model.h5
Epoch 30/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0179 - acc: 0.9949 - val_loss: 0.0151 - val_acc: 0.9944

Epoch 00030: val_loss did not improve
Epoch 31/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0159 - acc: 0.9948 - val_loss: 0.0151 - val_acc: 0.9944

Epoch 00031: val_loss did not improve
Epoch 32/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0159 - acc: 0.9950 - val_loss: 0.0158 - val_acc: 0.9942

Epoch 00032: val_loss did not improve
Epoch 33/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0166 - acc: 0.9946 - val_loss: 0.0151 - val_acc: 0.9944

Epoch 00033: val_loss did not improve
Epoch 34/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0158 - acc: 0.9951 - val_loss: 0.0151 - val_acc: 0.9944

Epoch 00034: val_loss did not improve
Epoch 35/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0163 - acc: 0.9953 - val_loss: 0.0151 - val_acc: 0.9944

Epoch 00035: val_loss did not improve
Epoch 36/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0171 - acc: 0.9951 - val_loss: 0.0150 - val_acc: 0.9944

Epoch 00036: val_loss did not improve
Epoch 37/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0166 - acc: 0.9948 - val_loss: 0.0148 - val_acc: 0.9946

Epoch 00037: val_loss improved from 0.01486 to 0.01477, saving model to best_model.h5
Epoch 38/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0159 - acc: 0.9952 - val_loss: 0.0146 - val_acc: 0.9948

Epoch 00038: val_loss improved from 0.01477 to 0.01459, saving model to best_model.h5
Epoch 39/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0165 - acc: 0.9945 - val_loss: 0.0147 - val_acc: 0.9948

Epoch 00039: val_loss did not improve
Epoch 40/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0158 - acc: 0.9948 - val_loss: 0.0149 - val_acc: 0.9944

Epoch 00040: val_loss did not improve
Epoch 41/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0147 - acc: 0.9953 - val_loss: 0.0148 - val_acc: 0.9946

Epoch 00041: val_loss did not improve
Epoch 42/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0137 - acc: 0.9955 - val_loss: 0.0146 - val_acc: 0.9948

Epoch 00042: val_loss did not improve
Epoch 43/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0150 - acc: 0.9953 - val_loss: 0.0146 - val_acc: 0.9948

Epoch 00043: val_loss did not improve
Epoch 44/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0140 - acc: 0.9956 - val_loss: 0.0144 - val_acc: 0.9950

Epoch 00044: val_loss improved from 0.01459 to 0.01439, saving model to best_model.h5
Epoch 45/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0150 - acc: 0.9958 - val_loss: 0.0146 - val_acc: 0.9944

Epoch 00045: val_loss did not improve
Epoch 46/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0143 - acc: 0.9960 - val_loss: 0.0144 - val_acc: 0.9950

Epoch 00046: val_loss did not improve
Epoch 47/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0145 - acc: 0.9958 - val_loss: 0.0149 - val_acc: 0.9946

Epoch 00047: val_loss did not improve
Epoch 48/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0134 - acc: 0.9954 - val_loss: 0.0149 - val_acc: 0.9946

Epoch 00048: val_loss did not improve
Epoch 49/100
19969/19969 [==============================] - 1s 70us/step - loss: 0.0139 - acc: 0.9957 - val_loss: 0.0142 - val_acc: 0.9950

Epoch 00049: val_loss improved from 0.01439 to 0.01416, saving model to best_model.h5
Epoch 50/100
19969/19969 [==============================] - 1s 70us/step - loss: 0.0146 - acc: 0.9953 - val_loss: 0.0145 - val_acc: 0.9946

Epoch 00050: val_loss did not improve
Epoch 51/100
19969/19969 [==============================] - 1s 70us/step - loss: 0.0139 - acc: 0.9956 - val_loss: 0.0144 - val_acc: 0.9950

Epoch 00051: val_loss did not improve
Epoch 52/100
19969/19969 [==============================] - 1s 71us/step - loss: 0.0133 - acc: 0.9959 - val_loss: 0.0148 - val_acc: 0.9946

Epoch 00052: val_loss did not improve
Epoch 53/100
19969/19969 [==============================] - 1s 74us/step - loss: 0.0141 - acc: 0.9956 - val_loss: 0.0139 - val_acc: 0.9952

Epoch 00053: val_loss improved from 0.01416 to 0.01388, saving model to best_model.h5
Epoch 54/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0139 - acc: 0.9959 - val_loss: 0.0139 - val_acc: 0.9952

Epoch 00054: val_loss did not improve
Epoch 55/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0147 - acc: 0.9956 - val_loss: 0.0142 - val_acc: 0.9952

Epoch 00055: val_loss did not improve
Epoch 56/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0129 - acc: 0.9964 - val_loss: 0.0139 - val_acc: 0.9952

Epoch 00056: val_loss did not improve
Epoch 57/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0150 - acc: 0.9956 - val_loss: 0.0139 - val_acc: 0.9950

Epoch 00057: val_loss improved from 0.01388 to 0.01387, saving model to best_model.h5
Epoch 58/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0136 - acc: 0.9958 - val_loss: 0.0142 - val_acc: 0.9952

Epoch 00058: val_loss did not improve
Epoch 59/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0137 - acc: 0.9961 - val_loss: 0.0141 - val_acc: 0.9952

Epoch 00059: val_loss did not improve
Epoch 60/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0132 - acc: 0.9958 - val_loss: 0.0139 - val_acc: 0.9950

Epoch 00060: val_loss did not improve
Epoch 61/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0123 - acc: 0.9962 - val_loss: 0.0141 - val_acc: 0.9950

Epoch 00061: val_loss did not improve
Epoch 62/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0140 - acc: 0.9956 - val_loss: 0.0139 - val_acc: 0.9950

Epoch 00062: val_loss did not improve
Epoch 63/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0124 - acc: 0.9961 - val_loss: 0.0140 - val_acc: 0.9950

Epoch 00063: val_loss did not improve
Epoch 64/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0127 - acc: 0.9961 - val_loss: 0.0146 - val_acc: 0.9950

Epoch 00064: val_loss did not improve
Epoch 65/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0126 - acc: 0.9965 - val_loss: 0.0144 - val_acc: 0.9952

Epoch 00065: val_loss did not improve
Epoch 66/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0124 - acc: 0.9964 - val_loss: 0.0138 - val_acc: 0.9950

Epoch 00066: val_loss improved from 0.01387 to 0.01383, saving model to best_model.h5
Epoch 67/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0126 - acc: 0.9959 - val_loss: 0.0140 - val_acc: 0.9954

Epoch 00067: val_loss did not improve
Epoch 68/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0135 - acc: 0.9957 - val_loss: 0.0139 - val_acc: 0.9950

Epoch 00068: val_loss did not improve
Epoch 69/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0133 - acc: 0.9961 - val_loss: 0.0138 - val_acc: 0.9950

Epoch 00069: val_loss improved from 0.01383 to 0.01378, saving model to best_model.h5
Epoch 70/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0133 - acc: 0.9959 - val_loss: 0.0140 - val_acc: 0.9952

Epoch 00070: val_loss did not improve
Epoch 71/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0121 - acc: 0.9963 - val_loss: 0.0140 - val_acc: 0.9952

Epoch 00071: val_loss did not improve
Epoch 72/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0114 - acc: 0.9967 - val_loss: 0.0137 - val_acc: 0.9952

Epoch 00072: val_loss improved from 0.01378 to 0.01366, saving model to best_model.h5
Epoch 73/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0124 - acc: 0.9963 - val_loss: 0.0137 - val_acc: 0.9950

Epoch 00073: val_loss did not improve
Epoch 74/100
19969/19969 [==============================] - 1s 67us/step - loss: 0.0115 - acc: 0.9963 - val_loss: 0.0141 - val_acc: 0.9954

Epoch 00074: val_loss did not improve
Epoch 75/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0120 - acc: 0.9964 - val_loss: 0.0137 - val_acc: 0.9952

Epoch 00075: val_loss did not improve
Epoch 76/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0123 - acc: 0.9959 - val_loss: 0.0141 - val_acc: 0.9954

Epoch 00076: val_loss did not improve
Epoch 77/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0115 - acc: 0.9965 - val_loss: 0.0142 - val_acc: 0.9954

Epoch 00077: val_loss did not improve
Epoch 78/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0125 - acc: 0.9963 - val_loss: 0.0142 - val_acc: 0.9954

Epoch 00078: val_loss did not improve
Epoch 79/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0110 - acc: 0.9969 - val_loss: 0.0140 - val_acc: 0.9954

Epoch 00079: val_loss did not improve
Epoch 80/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0119 - acc: 0.9963 - val_loss: 0.0138 - val_acc: 0.9952

Epoch 00080: val_loss did not improve
Epoch 81/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0120 - acc: 0.9958 - val_loss: 0.0136 - val_acc: 0.9952

Epoch 00081: val_loss improved from 0.01366 to 0.01364, saving model to best_model.h5
Epoch 82/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0111 - acc: 0.9966 - val_loss: 0.0141 - val_acc: 0.9954

Epoch 00082: val_loss did not improve
Epoch 83/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0113 - acc: 0.9965 - val_loss: 0.0138 - val_acc: 0.9952

Epoch 00083: val_loss did not improve
Epoch 84/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0117 - acc: 0.9965 - val_loss: 0.0137 - val_acc: 0.9952

Epoch 00084: val_loss did not improve
Epoch 85/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0122 - acc: 0.9964 - val_loss: 0.0139 - val_acc: 0.9952

Epoch 00085: val_loss did not improve
Epoch 86/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0121 - acc: 0.9961 - val_loss: 0.0141 - val_acc: 0.9954

Epoch 00086: val_loss did not improve
Epoch 87/100
19969/19969 [==============================] - 1s 71us/step - loss: 0.0118 - acc: 0.9962 - val_loss: 0.0140 - val_acc: 0.9954

Epoch 00087: val_loss did not improve
Epoch 88/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0112 - acc: 0.9966 - val_loss: 0.0136 - val_acc: 0.9952

Epoch 00088: val_loss improved from 0.01364 to 0.01362, saving model to best_model.h5
Epoch 89/100
19969/19969 [==============================] - 1s 70us/step - loss: 0.0110 - acc: 0.9967 - val_loss: 0.0134 - val_acc: 0.9954

Epoch 00089: val_loss improved from 0.01362 to 0.01343, saving model to best_model.h5
Epoch 90/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0118 - acc: 0.9964 - val_loss: 0.0136 - val_acc: 0.9952

Epoch 00090: val_loss did not improve
Epoch 91/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0112 - acc: 0.9964 - val_loss: 0.0140 - val_acc: 0.9956

Epoch 00091: val_loss did not improve
Epoch 92/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0108 - acc: 0.9970 - val_loss: 0.0135 - val_acc: 0.9954

Epoch 00092: val_loss did not improve
Epoch 93/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0103 - acc: 0.9968 - val_loss: 0.0135 - val_acc: 0.9954

Epoch 00093: val_loss did not improve
Epoch 94/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0115 - acc: 0.9963 - val_loss: 0.0141 - val_acc: 0.9954

Epoch 00094: val_loss did not improve
Epoch 95/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0104 - acc: 0.9968 - val_loss: 0.0134 - val_acc: 0.9952

Epoch 00095: val_loss improved from 0.01343 to 0.01336, saving model to best_model.h5
Epoch 96/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0112 - acc: 0.9966 - val_loss: 0.0134 - val_acc: 0.9952

Epoch 00096: val_loss did not improve
Epoch 97/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0106 - acc: 0.9969 - val_loss: 0.0137 - val_acc: 0.9952

Epoch 00097: val_loss did not improve
Epoch 98/100
19969/19969 [==============================] - 1s 69us/step - loss: 0.0107 - acc: 0.9970 - val_loss: 0.0140 - val_acc: 0.9956

Epoch 00098: val_loss did not improve
Epoch 99/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0111 - acc: 0.9964 - val_loss: 0.0137 - val_acc: 0.9952

Epoch 00099: val_loss did not improve
Epoch 100/100
19969/19969 [==============================] - 1s 68us/step - loss: 0.0116 - acc: 0.9958 - val_loss: 0.0142 - val_acc: 0.9954

Epoch 00100: val_loss did not improve
In [12]:
plot_loss_and_accuracy(hist)

预测测试集

In [13]:
model.load_weights('best_model.h5')
In [14]:
y_pred = model.predict(X_test, verbose=1)
y_pred = y_pred.clip(min=0.005, max=0.995)
12500/12500 [==============================] - 1s 46us/step
In [15]:
df = pd.read_csv('sample_submission.csv')
gen = ImageDataGenerator()
test_generator = gen.flow_from_directory('./tests', (224, 224), shuffle=False, batch_size=16, class_mode=None)

for i, fname in enumerate(test_generator.filenames):
    index = int(fname[fname.rfind('/')+1:fname.rfind('.')])
    df.set_value(index-1, 'label', y_pred[i])
    
df.to_csv('pred.csv', index=None)
df.head(10)
Found 12500 images belonging to 1 classes.
/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ipykernel_launcher.py:7: FutureWarning: set_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead
  import sys
Out[15]:
id label
0 1 0.995
1 2 0.995
2 3 0.995
3 4 0.995
4 5 0.005
5 6 0.005
6 7 0.005
7 8 0.005
8 9 0.005
9 10 0.005